%pip install pandas
%pip install tensorflow
%pip install matplotlib
%pip install numpy
%pip install scikit-learn
%pip install seaborn
%pip install tensorflow-gan
%pip install tqdm
Requirement already satisfied: pandas in ./.venv/lib/python3.9/site-packages (2.3.1) Requirement already satisfied: numpy>=1.22.4 in ./.venv/lib/python3.9/site-packages (from pandas) (2.0.2) Requirement already satisfied: python-dateutil>=2.8.2 in ./.venv/lib/python3.9/site-packages (from pandas) (2.9.0.post0) Requirement already satisfied: pytz>=2020.1 in ./.venv/lib/python3.9/site-packages (from pandas) (2025.2) Requirement already satisfied: tzdata>=2022.7 in ./.venv/lib/python3.9/site-packages (from pandas) (2025.2) Requirement already satisfied: six>=1.5 in ./.venv/lib/python3.9/site-packages (from python-dateutil>=2.8.2->pandas) (1.17.0) Note: you may need to restart the kernel to use updated packages. Requirement already satisfied: tensorflow in ./.venv/lib/python3.9/site-packages (2.19.0) Requirement already satisfied: absl-py>=1.0.0 in ./.venv/lib/python3.9/site-packages (from tensorflow) (2.3.1) Requirement already satisfied: astunparse>=1.6.0 in ./.venv/lib/python3.9/site-packages (from tensorflow) (1.6.3) Requirement already satisfied: flatbuffers>=24.3.25 in ./.venv/lib/python3.9/site-packages (from tensorflow) (25.2.10) Requirement already satisfied: gast!=0.5.0,!=0.5.1,!=0.5.2,>=0.2.1 in ./.venv/lib/python3.9/site-packages (from tensorflow) (0.6.0) Requirement already satisfied: google-pasta>=0.1.1 in ./.venv/lib/python3.9/site-packages (from tensorflow) (0.2.0) Requirement already satisfied: libclang>=13.0.0 in ./.venv/lib/python3.9/site-packages (from tensorflow) (18.1.1) Requirement already satisfied: opt-einsum>=2.3.2 in ./.venv/lib/python3.9/site-packages (from tensorflow) (3.4.0) Requirement already satisfied: packaging in ./.venv/lib/python3.9/site-packages (from tensorflow) (25.0) Requirement already satisfied: protobuf!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<6.0.0dev,>=3.20.3 in ./.venv/lib/python3.9/site-packages (from tensorflow) (5.29.5) Requirement already satisfied: requests<3,>=2.21.0 in ./.venv/lib/python3.9/site-packages (from tensorflow) (2.32.4) Requirement already satisfied: setuptools in ./.venv/lib/python3.9/site-packages (from tensorflow) (58.0.4) Requirement already satisfied: six>=1.12.0 in ./.venv/lib/python3.9/site-packages (from tensorflow) (1.17.0) Requirement already satisfied: termcolor>=1.1.0 in ./.venv/lib/python3.9/site-packages (from tensorflow) (3.1.0) Requirement already satisfied: typing-extensions>=3.6.6 in ./.venv/lib/python3.9/site-packages (from tensorflow) (4.14.1) Requirement already satisfied: wrapt>=1.11.0 in ./.venv/lib/python3.9/site-packages (from tensorflow) (1.17.2) Requirement already satisfied: grpcio<2.0,>=1.24.3 in ./.venv/lib/python3.9/site-packages (from tensorflow) (1.73.1) Requirement already satisfied: tensorboard~=2.19.0 in ./.venv/lib/python3.9/site-packages (from tensorflow) (2.19.0) Requirement already satisfied: keras>=3.5.0 in ./.venv/lib/python3.9/site-packages (from tensorflow) (3.10.0) Requirement already satisfied: numpy<2.2.0,>=1.26.0 in ./.venv/lib/python3.9/site-packages (from tensorflow) (2.0.2) Requirement already satisfied: h5py>=3.11.0 in ./.venv/lib/python3.9/site-packages (from tensorflow) (3.14.0) Requirement already satisfied: ml-dtypes<1.0.0,>=0.5.1 in ./.venv/lib/python3.9/site-packages (from tensorflow) (0.5.1) Requirement already satisfied: tensorflow-io-gcs-filesystem>=0.23.1 in ./.venv/lib/python3.9/site-packages (from tensorflow) (0.37.1) Requirement already satisfied: charset_normalizer<4,>=2 in ./.venv/lib/python3.9/site-packages (from requests<3,>=2.21.0->tensorflow) (3.4.2) Requirement already satisfied: idna<4,>=2.5 in ./.venv/lib/python3.9/site-packages (from requests<3,>=2.21.0->tensorflow) (3.10) Requirement already satisfied: urllib3<3,>=1.21.1 in ./.venv/lib/python3.9/site-packages (from requests<3,>=2.21.0->tensorflow) (2.5.0) Requirement already satisfied: certifi>=2017.4.17 in ./.venv/lib/python3.9/site-packages (from requests<3,>=2.21.0->tensorflow) (2025.7.14) Requirement already satisfied: markdown>=2.6.8 in ./.venv/lib/python3.9/site-packages (from tensorboard~=2.19.0->tensorflow) (3.8.2) Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in ./.venv/lib/python3.9/site-packages (from tensorboard~=2.19.0->tensorflow) (0.7.2) Requirement already satisfied: werkzeug>=1.0.1 in ./.venv/lib/python3.9/site-packages (from tensorboard~=2.19.0->tensorflow) (3.1.3) Requirement already satisfied: wheel<1.0,>=0.23.0 in ./.venv/lib/python3.9/site-packages (from astunparse>=1.6.0->tensorflow) (0.45.1) Requirement already satisfied: rich in ./.venv/lib/python3.9/site-packages (from keras>=3.5.0->tensorflow) (14.0.0) Requirement already satisfied: namex in ./.venv/lib/python3.9/site-packages (from keras>=3.5.0->tensorflow) (0.1.0) Requirement already satisfied: optree in ./.venv/lib/python3.9/site-packages (from keras>=3.5.0->tensorflow) (0.16.0) Requirement already satisfied: importlib-metadata>=4.4 in ./.venv/lib/python3.9/site-packages (from markdown>=2.6.8->tensorboard~=2.19.0->tensorflow) (8.7.0) Requirement already satisfied: zipp>=3.20 in ./.venv/lib/python3.9/site-packages (from importlib-metadata>=4.4->markdown>=2.6.8->tensorboard~=2.19.0->tensorflow) (3.23.0) Requirement already satisfied: MarkupSafe>=2.1.1 in ./.venv/lib/python3.9/site-packages (from werkzeug>=1.0.1->tensorboard~=2.19.0->tensorflow) (3.0.2) Requirement already satisfied: markdown-it-py>=2.2.0 in ./.venv/lib/python3.9/site-packages (from rich->keras>=3.5.0->tensorflow) (3.0.0) Requirement already satisfied: pygments<3.0.0,>=2.13.0 in ./.venv/lib/python3.9/site-packages (from rich->keras>=3.5.0->tensorflow) (2.19.2) Requirement already satisfied: mdurl~=0.1 in ./.venv/lib/python3.9/site-packages (from markdown-it-py>=2.2.0->rich->keras>=3.5.0->tensorflow) (0.1.2) Note: you may need to restart the kernel to use updated packages. Requirement already satisfied: matplotlib in ./.venv/lib/python3.9/site-packages (3.9.4) Requirement already satisfied: contourpy>=1.0.1 in ./.venv/lib/python3.9/site-packages (from matplotlib) (1.3.0) Requirement already satisfied: cycler>=0.10 in ./.venv/lib/python3.9/site-packages (from matplotlib) (0.12.1) Requirement already satisfied: fonttools>=4.22.0 in ./.venv/lib/python3.9/site-packages (from matplotlib) (4.59.0) Requirement already satisfied: kiwisolver>=1.3.1 in ./.venv/lib/python3.9/site-packages (from matplotlib) (1.4.7) Requirement already satisfied: numpy>=1.23 in ./.venv/lib/python3.9/site-packages (from matplotlib) (2.0.2) Requirement already satisfied: packaging>=20.0 in ./.venv/lib/python3.9/site-packages (from matplotlib) (25.0) Requirement already satisfied: pillow>=8 in ./.venv/lib/python3.9/site-packages (from matplotlib) (11.3.0) Requirement already satisfied: pyparsing>=2.3.1 in ./.venv/lib/python3.9/site-packages (from matplotlib) (3.2.3) Requirement already satisfied: python-dateutil>=2.7 in ./.venv/lib/python3.9/site-packages (from matplotlib) (2.9.0.post0) Requirement already satisfied: importlib-resources>=3.2.0 in ./.venv/lib/python3.9/site-packages (from matplotlib) (6.5.2) Requirement already satisfied: zipp>=3.1.0 in ./.venv/lib/python3.9/site-packages (from importlib-resources>=3.2.0->matplotlib) (3.23.0) Requirement already satisfied: six>=1.5 in ./.venv/lib/python3.9/site-packages (from python-dateutil>=2.7->matplotlib) (1.17.0) Note: you may need to restart the kernel to use updated packages. Requirement already satisfied: numpy in ./.venv/lib/python3.9/site-packages (2.0.2) Note: you may need to restart the kernel to use updated packages. Requirement already satisfied: scikit-learn in ./.venv/lib/python3.9/site-packages (1.6.1) Requirement already satisfied: numpy>=1.19.5 in ./.venv/lib/python3.9/site-packages (from scikit-learn) (2.0.2) Requirement already satisfied: scipy>=1.6.0 in ./.venv/lib/python3.9/site-packages (from scikit-learn) (1.13.1) Requirement already satisfied: joblib>=1.2.0 in ./.venv/lib/python3.9/site-packages (from scikit-learn) (1.5.1) Requirement already satisfied: threadpoolctl>=3.1.0 in ./.venv/lib/python3.9/site-packages (from scikit-learn) (3.6.0) Note: you may need to restart the kernel to use updated packages. Requirement already satisfied: seaborn in ./.venv/lib/python3.9/site-packages (0.13.2) Requirement already satisfied: numpy!=1.24.0,>=1.20 in ./.venv/lib/python3.9/site-packages (from seaborn) (2.0.2) Requirement already satisfied: pandas>=1.2 in ./.venv/lib/python3.9/site-packages (from seaborn) (2.3.1) Requirement already satisfied: matplotlib!=3.6.1,>=3.4 in ./.venv/lib/python3.9/site-packages (from seaborn) (3.9.4) Requirement already satisfied: contourpy>=1.0.1 in ./.venv/lib/python3.9/site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (1.3.0) Requirement already satisfied: cycler>=0.10 in ./.venv/lib/python3.9/site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (0.12.1) Requirement already satisfied: fonttools>=4.22.0 in ./.venv/lib/python3.9/site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (4.59.0) Requirement already satisfied: kiwisolver>=1.3.1 in ./.venv/lib/python3.9/site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (1.4.7) Requirement already satisfied: packaging>=20.0 in ./.venv/lib/python3.9/site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (25.0) Requirement already satisfied: pillow>=8 in ./.venv/lib/python3.9/site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (11.3.0) Requirement already satisfied: pyparsing>=2.3.1 in ./.venv/lib/python3.9/site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (3.2.3) Requirement already satisfied: python-dateutil>=2.7 in ./.venv/lib/python3.9/site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (2.9.0.post0) Requirement already satisfied: importlib-resources>=3.2.0 in ./.venv/lib/python3.9/site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (6.5.2) Requirement already satisfied: zipp>=3.1.0 in ./.venv/lib/python3.9/site-packages (from importlib-resources>=3.2.0->matplotlib!=3.6.1,>=3.4->seaborn) (3.23.0) Requirement already satisfied: pytz>=2020.1 in ./.venv/lib/python3.9/site-packages (from pandas>=1.2->seaborn) (2025.2) Requirement already satisfied: tzdata>=2022.7 in ./.venv/lib/python3.9/site-packages (from pandas>=1.2->seaborn) (2025.2) Requirement already satisfied: six>=1.5 in ./.venv/lib/python3.9/site-packages (from python-dateutil>=2.7->matplotlib!=3.6.1,>=3.4->seaborn) (1.17.0) Note: you may need to restart the kernel to use updated packages. Requirement already satisfied: tensorflow-gan in ./.venv/lib/python3.9/site-packages (2.1.0) Requirement already satisfied: tensorflow-hub>=0.2 in ./.venv/lib/python3.9/site-packages (from tensorflow-gan) (0.16.1) Requirement already satisfied: tensorflow-probability>=0.7 in ./.venv/lib/python3.9/site-packages (from tensorflow-gan) (0.25.0) Requirement already satisfied: numpy>=1.12.0 in ./.venv/lib/python3.9/site-packages (from tensorflow-hub>=0.2->tensorflow-gan) (2.0.2) Requirement already satisfied: protobuf>=3.19.6 in ./.venv/lib/python3.9/site-packages (from tensorflow-hub>=0.2->tensorflow-gan) (5.29.5) Requirement already satisfied: tf-keras>=2.14.1 in ./.venv/lib/python3.9/site-packages (from tensorflow-hub>=0.2->tensorflow-gan) (2.19.0) Requirement already satisfied: absl-py in ./.venv/lib/python3.9/site-packages (from tensorflow-probability>=0.7->tensorflow-gan) (2.3.1) Requirement already satisfied: six>=1.10.0 in ./.venv/lib/python3.9/site-packages (from tensorflow-probability>=0.7->tensorflow-gan) (1.17.0) Requirement already satisfied: decorator in ./.venv/lib/python3.9/site-packages (from tensorflow-probability>=0.7->tensorflow-gan) (5.2.1) Requirement already satisfied: cloudpickle>=1.3 in ./.venv/lib/python3.9/site-packages (from tensorflow-probability>=0.7->tensorflow-gan) (3.1.1) Requirement already satisfied: gast>=0.3.2 in ./.venv/lib/python3.9/site-packages (from tensorflow-probability>=0.7->tensorflow-gan) (0.6.0) Requirement already satisfied: dm-tree in ./.venv/lib/python3.9/site-packages (from tensorflow-probability>=0.7->tensorflow-gan) (0.1.8) Requirement already satisfied: tensorflow<2.20,>=2.19 in ./.venv/lib/python3.9/site-packages (from tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (2.19.0) Requirement already satisfied: astunparse>=1.6.0 in ./.venv/lib/python3.9/site-packages (from tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (1.6.3) Requirement already satisfied: flatbuffers>=24.3.25 in ./.venv/lib/python3.9/site-packages (from tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (25.2.10) Requirement already satisfied: google-pasta>=0.1.1 in ./.venv/lib/python3.9/site-packages (from tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (0.2.0) Requirement already satisfied: libclang>=13.0.0 in ./.venv/lib/python3.9/site-packages (from tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (18.1.1) Requirement already satisfied: opt-einsum>=2.3.2 in ./.venv/lib/python3.9/site-packages (from tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (3.4.0) Requirement already satisfied: packaging in ./.venv/lib/python3.9/site-packages (from tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (25.0) Requirement already satisfied: requests<3,>=2.21.0 in ./.venv/lib/python3.9/site-packages (from tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (2.32.4) Requirement already satisfied: setuptools in ./.venv/lib/python3.9/site-packages (from tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (58.0.4) Requirement already satisfied: termcolor>=1.1.0 in ./.venv/lib/python3.9/site-packages (from tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (3.1.0) Requirement already satisfied: typing-extensions>=3.6.6 in ./.venv/lib/python3.9/site-packages (from tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (4.14.1) Requirement already satisfied: wrapt>=1.11.0 in ./.venv/lib/python3.9/site-packages (from tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (1.17.2) Requirement already satisfied: grpcio<2.0,>=1.24.3 in ./.venv/lib/python3.9/site-packages (from tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (1.73.1) Requirement already satisfied: tensorboard~=2.19.0 in ./.venv/lib/python3.9/site-packages (from tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (2.19.0) Requirement already satisfied: keras>=3.5.0 in ./.venv/lib/python3.9/site-packages (from tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (3.10.0) Requirement already satisfied: h5py>=3.11.0 in ./.venv/lib/python3.9/site-packages (from tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (3.14.0) Requirement already satisfied: ml-dtypes<1.0.0,>=0.5.1 in ./.venv/lib/python3.9/site-packages (from tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (0.5.1) Requirement already satisfied: tensorflow-io-gcs-filesystem>=0.23.1 in ./.venv/lib/python3.9/site-packages (from tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (0.37.1) Requirement already satisfied: charset_normalizer<4,>=2 in ./.venv/lib/python3.9/site-packages (from requests<3,>=2.21.0->tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (3.4.2) Requirement already satisfied: idna<4,>=2.5 in ./.venv/lib/python3.9/site-packages (from requests<3,>=2.21.0->tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (3.10) Requirement already satisfied: urllib3<3,>=1.21.1 in ./.venv/lib/python3.9/site-packages (from requests<3,>=2.21.0->tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (2.5.0) Requirement already satisfied: certifi>=2017.4.17 in ./.venv/lib/python3.9/site-packages (from requests<3,>=2.21.0->tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (2025.7.14) Requirement already satisfied: markdown>=2.6.8 in ./.venv/lib/python3.9/site-packages (from tensorboard~=2.19.0->tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (3.8.2) Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in ./.venv/lib/python3.9/site-packages (from tensorboard~=2.19.0->tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (0.7.2) Requirement already satisfied: werkzeug>=1.0.1 in ./.venv/lib/python3.9/site-packages (from tensorboard~=2.19.0->tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (3.1.3) Requirement already satisfied: wheel<1.0,>=0.23.0 in ./.venv/lib/python3.9/site-packages (from astunparse>=1.6.0->tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (0.45.1) Requirement already satisfied: rich in ./.venv/lib/python3.9/site-packages (from keras>=3.5.0->tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (14.0.0) Requirement already satisfied: namex in ./.venv/lib/python3.9/site-packages (from keras>=3.5.0->tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (0.1.0) Requirement already satisfied: optree in ./.venv/lib/python3.9/site-packages (from keras>=3.5.0->tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (0.16.0) Requirement already satisfied: importlib-metadata>=4.4 in ./.venv/lib/python3.9/site-packages (from markdown>=2.6.8->tensorboard~=2.19.0->tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (8.7.0) Requirement already satisfied: zipp>=3.20 in ./.venv/lib/python3.9/site-packages (from importlib-metadata>=4.4->markdown>=2.6.8->tensorboard~=2.19.0->tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (3.23.0) Requirement already satisfied: MarkupSafe>=2.1.1 in ./.venv/lib/python3.9/site-packages (from werkzeug>=1.0.1->tensorboard~=2.19.0->tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (3.0.2) Requirement already satisfied: markdown-it-py>=2.2.0 in ./.venv/lib/python3.9/site-packages (from rich->keras>=3.5.0->tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (3.0.0) Requirement already satisfied: pygments<3.0.0,>=2.13.0 in ./.venv/lib/python3.9/site-packages (from rich->keras>=3.5.0->tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (2.19.2) Requirement already satisfied: mdurl~=0.1 in ./.venv/lib/python3.9/site-packages (from markdown-it-py>=2.2.0->rich->keras>=3.5.0->tensorflow<2.20,>=2.19->tf-keras>=2.14.1->tensorflow-hub>=0.2->tensorflow-gan) (0.1.2) Note: you may need to restart the kernel to use updated packages. Requirement already satisfied: tqdm in ./.venv/lib/python3.9/site-packages (4.67.1) Note: you may need to restart the kernel to use updated packages.
import numpy as np
import tensorflow as tf
import os
from glob import glob
seed = 111
np.random.seed(seed)
tf.random.set_seed(seed)
# Data paths
DATA_DIR = "data"
MONET_DIR = os.path.join(DATA_DIR, "monet_jpg")
PHOTO_DIR = os.path.join(DATA_DIR, "photo_jpg")
# Load image paths
monet_paths = sorted(glob(os.path.join(MONET_DIR, "*.jpg")))
photo_paths = sorted(glob(os.path.join(PHOTO_DIR, "*.jpg")))
print(f"Monet images: {len(monet_paths)}")
print(f"Photo images: {len(photo_paths)}")
/Users/mikebrozowski/monet-gan/.venv/lib/python3.9/site-packages/urllib3/__init__.py:35: NotOpenSSLWarning: urllib3 v2 only supports OpenSSL 1.1.1+, currently the 'ssl' module is compiled with 'LibreSSL 2.8.3'. See: https://github.com/urllib3/urllib3/issues/3020 warnings.warn(
Monet images: 300 Photo images: 7038
Problem Description¶
This project is a submission for the Kaggle competition: https://www.kaggle.com/competitions/gan-getting-started
In this project, I explore the use of Generative Adversarial Networks (GANs) to translate real-world photographs into artistic paintings in the style of Monet. This task is a form of image-to-image translation, where the goal is to learn a mapping from one format (photos) to another (Monet-style paintings) using deep learning.
The core objective is to train a model that takes a natural photo as input and outputs a generated image that mimics the Monet painting style. This is a typical application of CycleGANs, which are designed to work with unpaired datasets — meaning we do not have exact photo-painting pairs.
Dataset Description¶
The dataset consists of two unpaired image domains:
- data/monet_jpg/: Contains 300 JPG images of paintings by Claude Monet.
- data/photo_jpg/: Contains 7038 JPG images of real-world landscape photographs.
There is no one-to-one mapping between Monet images and photos. Instead, the model must learn to capture the distribution of Monet-style images and use it to transform photo images accordingly.
All images are RGB and vary in content but generally depict similar scenery: landscapes, nature, and outdoor settings — making them well-suited for style transfer.
import matplotlib.pyplot as plt
from PIL import Image
def show_images(image_paths, title, n=5):
plt.figure(figsize=(15, 3))
for i in range(n):
img = Image.open(image_paths[i])
plt.subplot(1, n, i+1)
plt.imshow(img)
plt.axis('off')
plt.suptitle(title, fontsize=16)
plt.show()
# Show sample Monet paintings
show_images(monet_paths, "Sample Monet Paintings")
# Show sample real-world photos
show_images(photo_paths, "Sample Real-World Photos")
Strategy Based on EDA¶
EDA confirms the formats are similar in content but different in style. The lack of paired images makes CycleGAN my desired approach.
The plan:
- Preprocess and normalize the images.
- Train and compare three CycleGAN variants:
- Baseline (9-block ResNet with partial InstanceNorm)
- Full InstanceNorm CycleGAN
- Lightweight (6-block ResNet) CycleGAN
- Evaluate generated Monet-style images qualitatively and quantitatively.
- Select the best model and tune hyperparameters such as learning rate, loss weights, and number of ResNet blocks.
import tensorflow as tf
# Constants
IMG_SIZE = 256
BATCH_SIZE = 16
AUTOTUNE = tf.data.AUTOTUNE
def load_and_preprocess_image(filename):
image = tf.io.read_file(filename)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [IMG_SIZE, IMG_SIZE])
image = (tf.cast(image, tf.float32) / 127.5) - 1.0 # Normalize to [-1, 1]
return image
def build_dataset(image_paths, shuffle=True):
ds = tf.data.Dataset.from_tensor_slices(image_paths)
ds = ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
if shuffle:
ds = ds.shuffle(buffer_size=len(image_paths))
ds = ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
return ds
# Build datasets
photo_ds = build_dataset(photo_paths)
monet_ds = build_dataset(monet_paths)
Comparison of Architectures and Hyperparameters¶
To identify the most effective architecture for photo-to-Monet translation, we train and evaluate the following models:
Architecture Variants¶
Baseline CycleGAN
- 9 ResNet blocks
- Instance normalization only in residual layers
- Serves as a control model
CycleGAN with Full Instance Normalization
- Applies InstanceNorm in all layers, including initial and final layers
- Expected to better capture artistic style and improve training stability
Lightweight CycleGAN
- 6 ResNet blocks instead of 9
- Reduces training time and model complexity
- Tests whether a lighter model can maintain quality
Hyperparameter Tuning¶
Once the best architecture is chosen, we tune:
- Learning Rate: Tested values include
2e-4,1e-4, and5e-5 - Cycle vs Identity Loss Weights: Varying the contribution of cycle and identity loss terms
- Number of Residual Blocks: Experimenting with
6,7, and9blocks
We evaluate results using:
- Visual inspection of generated images
- Training stability and loss curves
- Using a CNN model to predict whether an image is generated or originally a Monet image, using accuracy as the metric
These experiments help determine which model and configuration best capture Monet’s style without distorting content.
import tensorflow as tf
from tensorflow.keras.layers import Layer
from tensorflow.keras.initializers import Ones, Zeros
class InstanceNormalization(Layer):
def __init__(self, epsilon=1e-5, **kwargs):
super().__init__(**kwargs)
self.epsilon = epsilon
self.gamma_initializer = Ones()
self.beta_initializer = Zeros()
def build(self, input_shape):
channels = input_shape[-1]
self.gamma = self.add_weight(
shape=(channels,),
name='gamma',
initializer=self.gamma_initializer,
trainable=True
)
self.beta = self.add_weight(
shape=(channels,),
name='beta',
initializer=self.beta_initializer,
trainable=True
)
def call(self, inputs):
mean, variance = tf.nn.moments(inputs, axes=[1, 2], keepdims=True)
inv = tf.math.rsqrt(variance + self.epsilon)
normalized = (inputs - mean) * inv
return self.gamma[None, None, None, :] * normalized + self.beta[None, None, None, :]
from tensorflow.keras import layers
def resnet_block(x, filters, norm_layer):
shortcut = x
x = layers.Conv2D(filters, 3, padding='same')(x)
x = norm_layer()(x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(filters, 3, padding='same')(x)
x = norm_layer()(x)
return layers.add([x, shortcut])
from tensorflow.keras import models
def build_generator(img_shape=(256, 256, 3), n_blocks=9, norm_layer=InstanceNormalization):
inputs = layers.Input(shape=img_shape)
x = layers.Conv2D(64, 7, padding='same')(inputs)
x = norm_layer()(x)
x = layers.Activation('relu')(x)
for f in [128, 256]:
x = layers.Conv2D(f, 3, strides=2, padding='same')(x)
x = norm_layer()(x)
x = layers.Activation('relu')(x)
for _ in range(n_blocks):
x = resnet_block(x, 256, norm_layer)
for f in [128, 64]:
x = layers.Conv2DTranspose(f, 3, strides=2, padding='same')(x)
x = norm_layer()(x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(3, 7, padding='same', activation='tanh')(x)
return models.Model(inputs, x)
def build_discriminator(img_shape=(256, 256, 3)):
inputs = layers.Input(shape=img_shape)
x = layers.Conv2D(64, 4, strides=2, padding='same')(inputs)
x = layers.LeakyReLU(0.2)(x)
for f in [128, 256, 512]:
x = layers.Conv2D(f, 4, strides=2, padding='same')(x)
x = InstanceNormalization()(x)
x = layers.LeakyReLU(0.2)(x)
x = layers.Conv2D(1, 4, padding='same')(x)
return models.Model(inputs, x)
class CycleGANTrainer(tf.Module):
def __init__(self, G, F, DX, DY, g_opt, f_opt, dx_opt, dy_opt, lambda_cycle=10):
super().__init__()
self.G = G
self.F = F
self.DX = DX
self.DY = DY
self.g_opt = g_opt
self.f_opt = f_opt
self.dx_opt = dx_opt
self.dy_opt = dy_opt
self.lambda_cycle = lambda_cycle
self.loss_obj = tf.keras.losses.MeanSquaredError()
def discriminator_loss(self, real, fake):
real_loss = self.loss_obj(tf.ones_like(real), real)
fake_loss = self.loss_obj(tf.zeros_like(fake), fake)
return (real_loss + fake_loss) * 0.5
def generator_loss(self, fake):
return self.loss_obj(tf.ones_like(fake), fake)
def cycle_loss(self, real, cycled):
return tf.reduce_mean(tf.abs(real - cycled))
@tf.function
def __call__(self, real_x, real_y):
with tf.GradientTape(persistent=True) as tape:
fake_y = self.G(real_x, training=True)
cycled_x = self.F(fake_y, training=True)
fake_x = self.F(real_y, training=True)
cycled_y = self.G(fake_x, training=True)
same_x = self.F(real_x, training=True)
same_y = self.G(real_y, training=True)
dx_real = self.DX(real_x, training=True)
dx_fake = self.DX(fake_x, training=True)
dy_real = self.DY(real_y, training=True)
dy_fake = self.DY(fake_y, training=True)
g_loss = self.generator_loss(dy_fake)
f_loss = self.generator_loss(dx_fake)
total_cycle_loss = self.cycle_loss(real_x, cycled_x) + self.cycle_loss(real_y, cycled_y)
total_cycle_loss *= self.lambda_cycle
g_loss += total_cycle_loss + self.cycle_loss(real_y, same_y) * 0.5
f_loss += total_cycle_loss + self.cycle_loss(real_x, same_x) * 0.5
dx_loss = self.discriminator_loss(dx_real, dx_fake)
dy_loss = self.discriminator_loss(dy_real, dy_fake)
g_grads = tape.gradient(g_loss, self.G.trainable_variables)
f_grads = tape.gradient(f_loss, self.F.trainable_variables)
dx_grads = tape.gradient(dx_loss, self.DX.trainable_variables)
dy_grads = tape.gradient(dy_loss, self.DY.trainable_variables)
self.g_opt.apply_gradients(zip(g_grads, self.G.trainable_variables))
self.f_opt.apply_gradients(zip(f_grads, self.F.trainable_variables))
self.dx_opt.apply_gradients(zip(dx_grads, self.DX.trainable_variables))
self.dy_opt.apply_gradients(zip(dy_grads, self.DY.trainable_variables))
def train(self, photo_ds, monet_ds, epochs):
photo_iter = iter(photo_ds)
monet_iter = iter(monet_ds)
for epoch in range(epochs):
print(f"Epoch {epoch+1}/{epochs}")
for _ in range(min(len(photo_ds), len(monet_ds))):
real_x = next(photo_iter)
real_y = next(monet_iter)
self(real_x, real_y)
loss_fn = tf.keras.losses.MeanSquaredError()
def discriminator_loss(real, fake):
real_loss = loss_fn(tf.ones_like(real), real)
fake_loss = loss_fn(tf.zeros_like(fake), fake)
return (real_loss + fake_loss) * 0.5
def generator_loss(fake):
return loss_fn(tf.ones_like(fake), fake)
def cycle_consistency_loss(real, cycled, lamb=10):
return lamb * tf.reduce_mean(tf.abs(real - cycled))
def identity_loss(real, same, lamb=5):
return lamb * tf.reduce_mean(tf.abs(real - same))
g_opt_base = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
f_opt_base = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
dx_opt_base = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
dy_opt_base = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
G_base = build_generator(n_blocks=9)
F_base = build_generator(n_blocks=9)
DX_base = build_discriminator()
DY_base = build_discriminator()
trainer_base = CycleGANTrainer(G_base, F_base, DX_base, DY_base, g_opt_base, f_opt_base, dx_opt_base, dy_opt_base)
trainer_base.train(photo_ds, monet_ds, epochs=1)
Epoch 1/1
from PIL import Image
import os
def denormalize_img(tensor):
tensor = (tensor + 1.0) * 127.5
return tf.cast(tf.clip_by_value(tensor, 0, 255), tf.uint8)
def save_image_batch(batch_tensor, filenames, output_dir):
os.makedirs(output_dir, exist_ok=True)
for img, name in zip(batch_tensor, filenames):
img = denormalize_img(img).numpy()
img = Image.fromarray(img)
img.save(os.path.join(output_dir, name))
# Get a batch of real photos
sample_batch = next(iter(photo_ds))
output_batch = G_base(sample_batch, training=False)
# Generate filenames like: photo_0001.jpg, photo_0002.jpg, ...
filenames = [f"photo_{i:04d}.jpg" for i in range(len(output_batch))]
# Save to directory
save_image_batch(output_batch, filenames, output_dir="outputs/baseline")
# Build
G_full = build_generator(n_blocks=9, norm_layer=InstanceNormalization)
F_full = build_generator(n_blocks=9, norm_layer=InstanceNormalization)
DX_full = build_discriminator()
DY_full = build_discriminator()
# new optimizers (fresh set for each model)
g_opt_full = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
f_opt_full = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
dx_opt_full = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
dy_opt_full = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
# Train
trainer_full = CycleGANTrainer(G_full, F_full, DX_full, DY_full, g_opt_full, f_opt_full, dx_opt_full, dy_opt_full)
trainer_full.train(photo_ds, monet_ds, epochs=1)
# Generate samples
sample_batch = next(iter(photo_ds))
output_batch = G_full(sample_batch, training=False)
filenames = [f"photo_{i:04d}.jpg" for i in range(len(output_batch))]
save_image_batch(output_batch, filenames, output_dir="outputs/instancenorm")
Epoch 1/1
# Build
G_light = build_generator(n_blocks=6, norm_layer=InstanceNormalization)
F_light = build_generator(n_blocks=6, norm_layer=InstanceNormalization)
DX_light = build_discriminator()
DY_light = build_discriminator()
# new optimizers (fresh set for each model)
g_opt_light = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
f_opt_light = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
dx_opt_light = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
dy_opt_light = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
# Train
trainer_light = CycleGANTrainer(G_light, F_light, DX_light, DY_light, g_opt_light, f_opt_light, dx_opt_light, dy_opt_light)
trainer_light.train(photo_ds, monet_ds, epochs=1)
# Generate samples
sample_batch = next(iter(photo_ds))
output_batch = G_light(sample_batch, training=False)
filenames = [f"photo_{i:04d}.jpg" for i in range(len(output_batch))]
save_image_batch(output_batch, filenames, output_dir="outputs/lightweight")
Epoch 1/1
import matplotlib.pyplot as plt
def show_cycle_results(photo_batch, g_model, title):
monet_batch = g_model(photo_batch, training=False)
fig, axes = plt.subplots(2, len(photo_batch), figsize=(15, 5))
for i in range(len(photo_batch)):
axes[0, i].imshow((photo_batch[i].numpy() + 1) / 2)
axes[0, i].axis('off')
axes[1, i].imshow((monet_batch[i].numpy() + 1) / 2)
axes[1, i].axis('off')
axes[0, 0].set_ylabel("Input", fontsize=14)
axes[1, 0].set_ylabel("Monet", fontsize=14)
fig.suptitle(title, fontsize=16)
plt.tight_layout()
plt.show()
# Restore dataset
photo_ds = build_dataset(photo_paths)
# Get a batch
sample_batch = next(iter(photo_ds))
# Visualize both models
show_cycle_results(sample_batch, G_base, "Base Model Translation")
show_cycle_results(sample_batch, G_full, "Full Model Translation")
show_cycle_results(sample_batch, G_light, "Lightweight Model Translation")
Model Decision¶
It looks like the lightweight model was able to generate images without much difference compared to the base model or full model. Since it has a faster processing time, it looks like that is the best choice to generate the Monet-like images.
import os
from PIL import Image
import numpy as np
def save_images(model, dataset, out_dir, num_images=100):
os.makedirs(out_dir, exist_ok=True)
ds_list = list(dataset.unbatch().as_numpy_iterator())
for i in range(num_images):
image_index = np.random.randint(0, len(dataset))
img = ds_list[image_index]
img = tf.expand_dims(img, axis=0)
gen_img = model(img, training=False)[0]
gen_img = ((gen_img.numpy() + 1) * 127.5).astype("uint8") # [-1,1] → [0,255]
Image.fromarray(gen_img).save(os.path.join(out_dir, f"{i}.jpg"))
# Save generated Monet-style images and real Monet images
save_images(G_light, photo_ds, "fake_monet", num_images=300)
Test Against a CNN¶
Now that the images have been generated, I'll put it to the test against a CNN model to determine its accuracy. This doesn't really help to train the GAN, but it is a way to continue to see if it creates images well enough that a simple CNN architecture can determine if a given image is from Monet or generated. I expect even a simple CNN to be able to differentiate the two labels with a high accuracy.
import tensorflow as tf
import os
def load_labeled_dataset(dir_path, label):
files = tf.io.gfile.glob(os.path.join(dir_path, '*.jpg'))
ds = tf.data.Dataset.from_tensor_slices(files)
def _parse_image(file):
img = tf.io.read_file(file)
img = tf.image.decode_jpeg(img, channels=3)
img = tf.image.resize(img, [256, 256])
img = img / 255.0
return img, tf.constant(label, dtype=tf.float32)
return ds.map(_parse_image)
real_ds = load_labeled_dataset("data/monet_jpg", 1).take(300)
fake_ds = load_labeled_dataset("fake_monet", 0).take(300)
dataset = real_ds.concatenate(fake_ds).shuffle(512).batch(32)
def build_binary_classifier():
return tf.keras.Sequential([
tf.keras.layers.Input(shape=(256, 256, 3)),
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(64, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
clf = build_binary_classifier()
clf.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
clf.fit(dataset, epochs=3)
Epoch 1/3 19/19 ━━━━━━━━━━━━━━━━━━━━ 4s 166ms/step - accuracy: 0.5509 - loss: 2.1954 Epoch 2/3 19/19 ━━━━━━━━━━━━━━━━━━━━ 3s 168ms/step - accuracy: 0.9336 - loss: 0.2030 Epoch 3/3 19/19 ━━━━━━━━━━━━━━━━━━━━ 3s 167ms/step - accuracy: 0.9875 - loss: 0.0266
<keras.src.callbacks.history.History at 0x30a98e3a0>
import matplotlib.pyplot as plt
def show_predictions(model, dataset, num_images=10):
for images, labels in dataset.take(1):
preds = model.predict(images).flatten()
plt.figure(figsize=(15, 6))
for i in range(num_images):
plt.subplot(2, 5, i + 1)
plt.imshow(images[i])
plt.title(f"{'Real' if preds[i] > 0.5 else 'Fake'}\nScore: {preds[i]:.2f}")
plt.axis("off")
plt.tight_layout()
plt.show()
show_predictions(clf, dataset)
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step
import numpy as np
from sklearn.metrics import confusion_matrix, accuracy_score
# Get true labels and predicted probabilities
y_true = []
y_probs = []
for images, labels in dataset:
preds = clf.predict(images).squeeze()
y_probs.extend(preds)
y_true.extend(labels.numpy())
# Convert to binary predictions
y_pred = (np.array(y_probs) > 0.5).astype(int)
y_true = np.array(y_true)
# Compute accuracy and confusion matrix
acc = accuracy_score(y_true, y_pred)
cm = confusion_matrix(y_true, y_pred)
print(f"Accuracy: {acc:.2%}")
print("Confusion Matrix:")
print(cm)
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 56ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 61ms/step Accuracy: 99.50% Confusion Matrix: [[297 3] [ 0 300]]
The following cell was used to generate the images and compress them into a zip file to submit into the Kaggle competition.
# import os
# from tensorflow.keras.preprocessing import image
# from tqdm import tqdm
# import zipfile
# # 1. Load test photos
# test_folder = "fake_monet" # folder of photos you need to translate
# test_paths = sorted([os.path.join(test_folder, fname)
# for fname in os.listdir(test_folder) if fname.endswith(".jpg")])
# # 2. Generate and save Monet-style images
# os.makedirs("generated_monet_test", exist_ok=True)
# for idx, img_path in enumerate(tqdm(test_paths)):
# img = image.load_img(img_path, target_size=(256, 256))
# arr = image.img_to_array(img) / 127.5 - 1
# gen = G_full(tf.expand_dims(arr, 0), training=False)[0]
# gen = (gen.numpy() * 127.5).astype("uint8")
# fname = f"{os.path.splitext(os.path.basename(img_path))[0]}.jpg"
# image.array_to_img(gen).save(os.path.join("generated_monet_test", fname))
# # 3. Create images.zip
# with zipfile.ZipFile("images.zip", "w") as z:
# for fname in os.listdir("generated_monet_test"):
# z.write(os.path.join("generated_monet_test", fname),
# arcname=fname)